From 383ffeaa479215a8d5e6e612e1bcc257f7361490 Mon Sep 17 00:00:00 2001 From: "awilliam@xenbuild.aw" Date: Sun, 1 Oct 2006 11:19:45 -0600 Subject: [PATCH] [IA64] Fix a VTi physical mode bug When guest writes rr in physical mode, if it is rr0 or rr4, Xen can't write it into machine rr. Signed-off-by: Xuefei Xu --- xen/arch/ia64/vmx/vmx_phy_mode.c | 47 ++++++++++---------------------- xen/arch/ia64/vmx/vmx_vcpu.c | 17 ++++++++++-- 2 files changed, 29 insertions(+), 35 deletions(-) diff --git a/xen/arch/ia64/vmx/vmx_phy_mode.c b/xen/arch/ia64/vmx/vmx_phy_mode.c index 8745721d54..f86965abe1 100644 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c @@ -126,10 +126,16 @@ void vmx_init_all_rr(VCPU *vcpu) { VMX(vcpu, vrr[VRN0]) = 0x38; + // enable vhpt in guest physical mode + vcpu->arch.metaphysical_rr0 |= 1; + vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38); VMX(vcpu, vrr[VRN1]) = 0x38; VMX(vcpu, vrr[VRN2]) = 0x38; VMX(vcpu, vrr[VRN3]) = 0x38; VMX(vcpu, vrr[VRN4]) = 0x38; + // enable vhpt in guest physical mode + vcpu->arch.metaphysical_rr4 |= 1; + vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38); VMX(vcpu, vrr[VRN5]) = 0x38; VMX(vcpu, vrr[VRN6]) = 0x38; VMX(vcpu, vrr[VRN7]) = 0x738; @@ -141,11 +147,9 @@ void vmx_load_all_rr(VCPU *vcpu) { unsigned long psr; - ia64_rr phy_rr; local_irq_save(psr); - /* WARNING: not allow co-exist of both virtual mode and physical * mode in same region */ @@ -154,24 +158,16 @@ vmx_load_all_rr(VCPU *vcpu) panic_domain(vcpu_regs(vcpu), "Unexpected domain switch in phy emul\n"); } - phy_rr.rrval = vcpu->arch.metaphysical_rr0; - //phy_rr.ps = PAGE_SHIFT; - phy_rr.ve = 1; - - ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval); + ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); ia64_dv_serialize_data(); - phy_rr.rrval = vcpu->arch.metaphysical_rr4; - //phy_rr.ps = PAGE_SHIFT; - phy_rr.ve = 1; - - ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval); + ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4); ia64_dv_serialize_data(); } else { ia64_set_rr((VRN0 << VRN_SHIFT), - vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0]))); + vcpu->arch.metaphysical_saved_rr0); ia64_dv_serialize_data(); ia64_set_rr((VRN4 << VRN_SHIFT), - vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4]))); + vcpu->arch.metaphysical_saved_rr4); ia64_dv_serialize_data(); } @@ -209,21 +205,11 @@ void switch_to_physical_rid(VCPU *vcpu) { UINT64 psr; - ia64_rr phy_rr, mrr; - /* Save original virtual mode rr[0] and rr[4] */ psr=ia64_clear_ic(); - phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0; - mrr.rrval = ia64_get_rr(VRN0 << VRN_SHIFT); - phy_rr.ps = mrr.ps; - phy_rr.ve = 1; - ia64_set_rr(VRN0<arch.metaphysical_rr0); ia64_srlz_d(); - phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4; - mrr.rrval = ia64_get_rr(VRN4 << VRN_SHIFT); - phy_rr.ps = mrr.ps; - phy_rr.ve = 1; - ia64_set_rr(VRN4<arch.metaphysical_rr4); ia64_srlz_d(); ia64_set_psr(psr); @@ -236,15 +222,10 @@ void switch_to_virtual_rid(VCPU *vcpu) { UINT64 psr; - ia64_rr mrr; - psr=ia64_clear_ic(); - - vcpu_get_rr(vcpu,VRN0<arch.metaphysical_saved_rr0); ia64_srlz_d(); - vcpu_get_rr(vcpu,VRN4<arch.metaphysical_saved_rr4); ia64_srlz_d(); ia64_set_psr(psr); ia64_srlz_i(); diff --git a/xen/arch/ia64/vmx/vmx_vcpu.c b/xen/arch/ia64/vmx/vmx_vcpu.c index 3d9c65a7f0..ea78850a2f 100644 --- a/xen/arch/ia64/vmx/vmx_vcpu.c +++ b/xen/arch/ia64/vmx/vmx_vcpu.c @@ -212,19 +212,32 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val) { ia64_rr oldrr,newrr; extern void * pal_vaddr; + u64 rrval; vcpu_get_rr(vcpu, reg, &oldrr.rrval); newrr.rrval=val; if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits)) panic_domain (NULL, "use of invalid rid %x\n", newrr.rid); - VMX(vcpu,vrr[reg>>61]) = val; - switch((u64)(reg>>61)) { + VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val; + switch((u64)(reg>>VRN_SHIFT)) { case VRN7: vmx_switch_rr7(vrrtomrr(vcpu,val),vcpu->domain->shared_info, (void *)vcpu->arch.privregs, (void *)vcpu->arch.vhpt.hash, pal_vaddr ); break; + case VRN4: + rrval = vrrtomrr(vcpu,val); + vcpu->arch.metaphysical_saved_rr4 = rrval; + if (!is_physical_mode(vcpu)) + ia64_set_rr(reg,rrval); + break; + case VRN0: + rrval = vrrtomrr(vcpu,val); + vcpu->arch.metaphysical_saved_rr0 = rrval; + if (!is_physical_mode(vcpu)) + ia64_set_rr(reg,rrval); + break; default: ia64_set_rr(reg,vrrtomrr(vcpu,val)); break; -- 2.30.2